spin_unlock(&netif->tx_lock);
pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
-
+
+#if 0
/*
* Scheduling checks must happen after the above response is posted.
* This avoids a possible race with a guest OS on another CPU.
if ( (netif->tx_req_cons != netif->tx->req_prod) &&
((netif->tx_req_cons-netif->tx_resp_prod) != NETIF_TX_RING_SIZE) )
add_to_net_schedule_list_tail(netif);
+#endif
netif_put(netif);
netif_put(netif);
continue;
}
- rmb(); /* Ensure that we see the request. */
+
+ netif->tx->req_cons = ++netif->tx_req_cons;
+
+ /*
+ * 1. Ensure that we see the request when we copy it.
+ * 2. Ensure that frontend sees updated req_cons before we check
+ * for more work to schedule.
+ */
+ mb();
+
memcpy(&txreq, &netif->tx->ring[MASK_NETIF_TX_IDX(i)].req,
sizeof(txreq));
- netif->tx_req_cons++;
#if 0
/* Credit-based scheduling. */
np->stats.tx_packets++;
/* Only notify Xen if there are no outstanding responses. */
+ /*
+ * KAF (16/9/04): Checking outstanding responses is unsafe, as pending work
+ * may be dependent on packets not yet seen by the backend (e.g., he may
+ * have a partially-assembled fragmented IP packet). For now, the check is
+ * more conservative -- has the backend seen all previous requests?
+ */
mb();
- if ( np->tx->resp_prod == i )
+ if ( np->tx->req_cons/*resp_prod*/ == i )
notify_via_evtchn(np->evtchn);
return 0;
/*
* Frontend places packets into ring at tx_req_prod.
* Frontend receives event when tx_resp_prod passes tx_event.
+ * 'req_cons' is a shadow of the backend's request consumer -- the frontend
+ * may use it to determine if all queued packets have been seen by the
+ * backend.
*/
NETIF_RING_IDX req_prod; /* 0 */
- NETIF_RING_IDX resp_prod; /* 4 */
- NETIF_RING_IDX event; /* 8 */
- union { /* 12 */
+ NETIF_RING_IDX req_cons; /* 4 */
+ NETIF_RING_IDX resp_prod; /* 8 */
+ NETIF_RING_IDX event; /* 12 */
+ union { /* 16 */
netif_tx_request_t req;
netif_tx_response_t resp;
} PACKED ring[NETIF_TX_RING_SIZE];